In [1]:
%config Completer.use_jedi = False
import tensorflow as tf
from tensorflow.keras import datasets, layers, models
import matplotlib.pyplot as plt
from tensorflow.keras.models import Model
from tensorflow.keras import layers, losses
import matplotlib.pyplot as plt
import numpy as np
import os
import logging
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
import tensorflow
import tensorflow_probability as tfp
import plotly.express as px
import pandas as pd
In [2]:
(train_images, train_labels), (test_images, test_labels) = datasets.cifar10.load_data()

# Normalize pixel values to be between 0 and 1
train_images, test_images = train_images / 255.0, test_images / 255.0
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
In [3]:
train_images.shape
Out[3]:
(50000, 32, 32, 3)
In [4]:
channel = 2
In [5]:
latent_dim = 1024

class Autoencoder(Model):
    def __init__(self, latent_dim):
        super(Autoencoder, self).__init__()
        self.latent_dim = latent_dim
        self.encoder = tf.keras.Sequential([
            layers.Flatten(),
            layers.Dense(latent_dim, activation='relu'),
        ])
        self.decoder = tf.keras.Sequential([
            layers.Dense(32*32, activation='sigmoid'),
            layers.Reshape((32, 32))
        ])

    def call(self, x):
        encoded = self.encoder(x)
        decoded = self.decoder(encoded)
        return decoded


autoencoder = Autoencoder(latent_dim)
autoencoder.compile(optimizer='adam', loss=losses.MeanSquaredError())
In [6]:
# model_dir = f'autoencoder_latent_dim_{latent_dim}_dataset_CIFAR10'
# if os.path.exists(model_dir):
#     print('loading saved model')
#     tf.saved_model.load(export_dir=model_dir)
# else:
#     print('training model')
    
#     autoencoder.fit(train_images[:,:,:,channel], train_images[:,:,:,channel],
#                 epochs=10,
#                 shuffle=True,
#                 validation_data=(test_images[:,:,:,channel], test_images[:,:,:,channel]))
#     print('saving model')
#     tf.saved_model.save(obj=autoencoder,export_dir=model_dir)
autoencoder.fit(train_images[:,:,:,channel], train_images[:,:,:,channel],
                epochs=10,
                shuffle=True,
                validation_data=(test_images[:,:,:,channel], test_images[:,:,:,channel]))
Epoch 1/10
1563/1563 [==============================] - 14s 9ms/step - loss: 0.0250 - val_loss: 0.0064
Epoch 2/10
1563/1563 [==============================] - 14s 9ms/step - loss: 0.0059 - val_loss: 0.0046
Epoch 3/10
1563/1563 [==============================] - 13s 9ms/step - loss: 0.0044 - val_loss: 0.0037
Epoch 4/10
1563/1563 [==============================] - 13s 8ms/step - loss: 0.0036 - val_loss: 0.0032
Epoch 5/10
1563/1563 [==============================] - 13s 8ms/step - loss: 0.0031 - val_loss: 0.0028
Epoch 6/10
1563/1563 [==============================] - 13s 9ms/step - loss: 0.0028 - val_loss: 0.0027
Epoch 7/10
1563/1563 [==============================] - 13s 8ms/step - loss: 0.0026 - val_loss: 0.0023
Epoch 8/10
1563/1563 [==============================] - 13s 8ms/step - loss: 0.0024 - val_loss: 0.0025
Epoch 9/10
1563/1563 [==============================] - 13s 8ms/step - loss: 0.0022 - val_loss: 0.0022
Epoch 10/10
1563/1563 [==============================] - 13s 8ms/step - loss: 0.0021 - val_loss: 0.0022
Out[6]:
<tensorflow.python.keras.callbacks.History at 0x7f8d630dd4a8>
In [7]:
# tf.saved_model.save(obj=autoencoder,export_dir=model_dir)
In [8]:
z = autoencoder.encoder(test_images[:,:,:,channel]).numpy()
decoded_imgs = autoencoder.decoder(z).numpy()
In [9]:
n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
    # display original
    ax = plt.subplot(2, n, i + 1)
    plt.imshow(test_images[i,:,:,channel])
    plt.title("original")
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)

    # display reconstruction
    ax = plt.subplot(2, n, i + 1 + n)
    plt.imshow(decoded_imgs[i])
    plt.title("reconstructed")
    plt.gray()
    ax.get_xaxis().set_visible(False)
    ax.get_yaxis().set_visible(False)
plt.show()
In [10]:
def fix_non_positive_definite_matrix(A,eps=0.01):
    logger = logging.getLogger('fix_non_positive_definite_matrix')
    e,v = tf.linalg.eigh(A)
    e_fix = e * tf.cast(tf.greater(e,0),dtype=tf.float32)+eps *tf.cast(tf.less_equal(e,0),dtype=tf.float32)
    A_fix = tf.matmul(v,tf.matmul(tf.linalg.diag(e_fix),tf.transpose(v)))
    A_norm = tf.norm(A)
    A_fix_norm = tf.norm(A_fix)
    corr_err = tf.math.round(1.0*tf.math.abs(A_norm-A_fix_norm)/A_norm,10)
    logger.info(f'Correction error : {corr_err}')
    logger.info(f'VV^T after fixing = {tf.linalg.diag_part(tf.matmul(v,tf.transpose(v)))}')
    return A_fix
In [11]:
def calc_z_likelihood(z,raxis = 0):
    logger = logging.getLogger('calc_z_likelihood')
    sample_mean = tf.reduce_max(input_tensor=z,axis=raxis)
    sample_cov = tfp.stats.covariance(x=z)
    sample_cov_fixed  = fix_non_positive_definite_matrix(sample_cov)
    
    e,v = tf.linalg.eigh(tensor=sample_cov_fixed)
    logger.info(f'Eigenvalues for sample covariance marix : shape =  {e.shape} , values = {e}')

    L = tf.linalg.cholesky(input=sample_cov_fixed)
    
    sample_var = tf.linalg.diag_part(input=sample_cov)+0.1
#     logger.debug(f'sample_mean = {sample_mean.shape}')
#     logger.debug(f'sample_cov = {sample_cov.shape}')
    logger.debug(f'sample_var = {sample_var}')
    mvn_diag = tfp.distributions.MultivariateNormalDiag(loc=sample_mean,scale_diag=sample_var)
    mvn_tri = tfp.distributions.MultivariateNormalTriL(loc=sample_mean,scale_tril=L)
    log_p_diag = tf.reduce_sum(mvn_diag.log_prob(z))
    log_p_tri = tf.reduce_sum(mvn_tri.log_prob(z))
    logger.debug(f' log_p_tri = {log_p_tri}')
    logger.debug(f' log_p_diag = {log_p_diag}')
    logger.debug(f'Ratio log_p_tri / log_p_diag = {np.round(log_p_tri/log_p_diag,3)}')
In [13]:
###### unique_labels = np.unique(train_labels)

logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('Main')
############
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('Main')
auto_encoders = dict()
train_labels2 = np.transpose(train_labels)[0]
test_labels2 = np.transpose(test_labels)[0]
unique_labels = np.unique(train_labels2)
for label in unique_labels:
    train_idx = train_labels2==label
    test_idx = test_labels2 == label
    
    train_images_label = train_images[train_idx,:,:,channel]
    test_images_label = test_images[test_idx,:,:,channel]
    
#     print(class_names[label])
#     plt.imshow(train_images_label[0,:,:])
#     plt.show()
    logger.info(f'Training autoencoder for label {label} with name {class_names[label]}')
    auto_encoders[label] = autoencoder = Autoencoder(latent_dim)
    auto_encoders[label].compile(optimizer='adam', loss=losses.MeanSquaredError())
    auto_encoders[label].fit(train_images_label, train_images_label,
                epochs=10,
                shuffle=True,
                validation_data=(test_images_label, test_images_label))
    
    z = auto_encoders[label].encoder(test_images_label)
    if z.shape[0] >2:
        logger.info('Apply PCA for 2D plot of Z')
        s = StandardScaler()
        z_scaled = s.fit(z)
        pca = PCA(n_components=2)
        z_pca = pca.fit_transform(z)
        z1 = z_pca[:,0]
        z2 = z_pca[:,1]
    elif z.shape[0] == 2:
        z1 = z[:,0].numpy()
        z2 = z[:,1].numpy()
    else:
        raise ValueError('Z shape must be >=2')
#     plt.hist2d(z1, z2, bins=(50, 50), cmap=plt.cm.jet)
#     plt.show()
    
    fig = px.density_contour(x=pd.Series(z1), y=pd.Series(z2), marginal_x="histogram", marginal_y="histogram")
    fig.show()
    
    logger.info(f'Z shape of auto_encoder_{class_names[label]} = {z.shape}')
    try:
        calc_z_likelihood(z=z)
    except Exception as e:
        print(e)
        continue
INFO:Main:Training autoencoder for label 0 with name airplane
Epoch 1/10
157/157 [==============================] - 2s 10ms/step - loss: 0.0435 - val_loss: 0.0249
Epoch 2/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0231 - val_loss: 0.0186
Epoch 3/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0178 - val_loss: 0.0180
Epoch 4/10
157/157 [==============================] - 1s 9ms/step - loss: 0.0144 - val_loss: 0.0136
Epoch 5/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0109 - val_loss: 0.0109
Epoch 6/10
157/157 [==============================] - 1s 9ms/step - loss: 0.0099 - val_loss: 0.0157
Epoch 7/10
157/157 [==============================] - 1s 9ms/step - loss: 0.0091 - val_loss: 0.0093
Epoch 8/10
157/157 [==============================] - 1s 9ms/step - loss: 0.0082 - val_loss: 0.0071
Epoch 9/10
157/157 [==============================] - 1s 9ms/step - loss: 0.0074 - val_loss: 0.0077
Epoch 10/10
157/157 [==============================] - 1s 9ms/step - loss: 0.0066 - val_loss: 0.0071
INFO:Main:Apply PCA for 2D plot of Z
INFO:Main:Z shape of auto_encoder_airplane = (1000, 1024)
INFO:fix_non_positive_definite_matrix:Correction error : 0.0
INFO:fix_non_positive_definite_matrix:VV^T after fixing = [1.0000054 1.000006  1.0000058 ... 1.0000017 1.        1.0000067]
INFO:calc_z_likelihood:Eigenvalues for sample covariance marix : shape =  (1024,) , values = [-2.7242477e-08 -2.1036181e-08 -8.7848271e-09 ...  9.1265230e+00
  1.2165938e+01  3.4936604e+01]
INFO:Main:Training autoencoder for label 1 with name automobile
Cholesky decomposition was not successful. The input might not be valid. [Op:Cholesky]
Epoch 1/10
157/157 [==============================] - 2s 10ms/step - loss: 0.0504 - val_loss: 0.0265
Epoch 2/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0240 - val_loss: 0.0203
Epoch 3/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0166 - val_loss: 0.0137
Epoch 4/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0129 - val_loss: 0.0134
Epoch 5/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0111 - val_loss: 0.0097
Epoch 6/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0092 - val_loss: 0.0092
Epoch 7/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0086 - val_loss: 0.0085
Epoch 8/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0080 - val_loss: 0.0077
Epoch 9/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0072 - val_loss: 0.0074
Epoch 10/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0067 - val_loss: 0.0081
INFO:Main:Apply PCA for 2D plot of Z
INFO:Main:Z shape of auto_encoder_automobile = (1000, 1024)
INFO:fix_non_positive_definite_matrix:Correction error : 0.0
INFO:fix_non_positive_definite_matrix:VV^T after fixing = [1.        1.000016  1.0000002 ... 1.0000021 1.00001   1.0000091]
INFO:calc_z_likelihood:Eigenvalues for sample covariance marix : shape =  (1024,) , values = [-5.7409551e-08 -3.1989355e-08 -2.1886828e-08 ...  8.9574318e+00
  9.7429266e+00  4.2977600e+01]
INFO:Main:Training autoencoder for label 2 with name bird
Cholesky decomposition was not successful. The input might not be valid. [Op:Cholesky]
Epoch 1/10
157/157 [==============================] - 2s 9ms/step - loss: 0.0457 - val_loss: 0.0277
Epoch 2/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0252 - val_loss: 0.0160
Epoch 3/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0142 - val_loss: 0.0110
Epoch 4/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0103 - val_loss: 0.0116
Epoch 5/10
157/157 [==============================] - 1s 9ms/step - loss: 0.0084 - val_loss: 0.0094
Epoch 6/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0074 - val_loss: 0.0073
Epoch 7/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0069 - val_loss: 0.0067
Epoch 8/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0061 - val_loss: 0.0062
Epoch 9/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0061 - val_loss: 0.0062
Epoch 10/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0059 - val_loss: 0.0067
INFO:Main:Apply PCA for 2D plot of Z
INFO:Main:Z shape of auto_encoder_bird = (1000, 1024)
INFO:fix_non_positive_definite_matrix:Correction error : 0.0
INFO:fix_non_positive_definite_matrix:VV^T after fixing = [1.        1.0000011 1.0000055 ... 1.        1.        1.       ]
INFO:calc_z_likelihood:Eigenvalues for sample covariance marix : shape =  (1024,) , values = [-1.09595213e-07 -1.39828975e-08 -1.35624418e-08 ...  4.62063360e+00
  5.75007820e+00  5.15804825e+01]
INFO:Main:Training autoencoder for label 3 with name cat
Cholesky decomposition was not successful. The input might not be valid. [Op:Cholesky]
Epoch 1/10
157/157 [==============================] - 2s 9ms/step - loss: 0.0509 - val_loss: 0.0274
Epoch 2/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0248 - val_loss: 0.0167
Epoch 3/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0153 - val_loss: 0.0111
Epoch 4/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0106 - val_loss: 0.0107
Epoch 5/10
157/157 [==============================] - 1s 9ms/step - loss: 0.0087 - val_loss: 0.0082
Epoch 6/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0082 - val_loss: 0.0076
Epoch 7/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0077 - val_loss: 0.0071
Epoch 8/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0064 - val_loss: 0.0064
Epoch 9/10
157/157 [==============================] - 1s 9ms/step - loss: 0.0059 - val_loss: 0.0061
Epoch 10/10
157/157 [==============================] - 1s 9ms/step - loss: 0.0056 - val_loss: 0.0058
INFO:Main:Apply PCA for 2D plot of Z
INFO:Main:Z shape of auto_encoder_cat = (1000, 1024)
INFO:fix_non_positive_definite_matrix:Correction error : 0.0
INFO:fix_non_positive_definite_matrix:VV^T after fixing = [1.0000079 1.0000086 1.000001  ... 1.0000094 1.        1.0000083]
INFO:calc_z_likelihood:Eigenvalues for sample covariance marix : shape =  (1024,) , values = [-7.5469035e-09 -7.5104607e-09 -7.4855722e-09 ...  7.7614527e+00
  1.0501882e+01  6.2977661e+01]
INFO:Main:Training autoencoder for label 4 with name deer
Cholesky decomposition was not successful. The input might not be valid. [Op:Cholesky]
Epoch 1/10
157/157 [==============================] - 2s 9ms/step - loss: 0.0407 - val_loss: 0.0241
Epoch 2/10
157/157 [==============================] - 1s 9ms/step - loss: 0.0225 - val_loss: 0.0151
Epoch 3/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0132 - val_loss: 0.0083
Epoch 4/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0089 - val_loss: 0.0077
Epoch 5/10
157/157 [==============================] - 1s 9ms/step - loss: 0.0073 - val_loss: 0.0069
Epoch 6/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0064 - val_loss: 0.0062
Epoch 7/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0058 - val_loss: 0.0056
Epoch 8/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0056 - val_loss: 0.0053
Epoch 9/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0049 - val_loss: 0.0054
Epoch 10/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0048 - val_loss: 0.0048
INFO:Main:Apply PCA for 2D plot of Z
INFO:Main:Z shape of auto_encoder_deer = (1000, 1024)
INFO:fix_non_positive_definite_matrix:Correction error : 0.0
INFO:fix_non_positive_definite_matrix:VV^T after fixing = [1.        1.        1.        ... 1.0000074 1.        1.0000069]
INFO:calc_z_likelihood:Eigenvalues for sample covariance marix : shape =  (1024,) , values = [-5.7200950e-08 -1.1932415e-08 -7.9461113e-09 ...  3.5285962e+00
  7.5383816e+00  3.5520523e+01]
INFO:Main:Training autoencoder for label 5 with name dog
Cholesky decomposition was not successful. The input might not be valid. [Op:Cholesky]
Epoch 1/10
157/157 [==============================] - 2s 8ms/step - loss: 0.0471 - val_loss: 0.0276
Epoch 2/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0247 - val_loss: 0.0181
Epoch 3/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0155 - val_loss: 0.0122
Epoch 4/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0114 - val_loss: 0.0123
Epoch 5/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0093 - val_loss: 0.0086
Epoch 6/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0075 - val_loss: 0.0071
Epoch 7/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0066 - val_loss: 0.0070
Epoch 8/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0062 - val_loss: 0.0062
Epoch 9/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0057 - val_loss: 0.0059
Epoch 10/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0057 - val_loss: 0.0084
INFO:Main:Apply PCA for 2D plot of Z
INFO:Main:Z shape of auto_encoder_dog = (1000, 1024)
INFO:fix_non_positive_definite_matrix:Correction error : 0.0
INFO:fix_non_positive_definite_matrix:VV^T after fixing = [1.0000088 1.0000106 1.0000001 ... 1.0000114 1.        1.0000072]
INFO:calc_z_likelihood:Eigenvalues for sample covariance marix : shape =  (1024,) , values = [-4.0609915e-08 -1.1772138e-08 -9.1254320e-09 ...  8.2653561e+00
  1.3551906e+01  3.3936813e+01]
INFO:Main:Training autoencoder for label 6 with name frog
Cholesky decomposition was not successful. The input might not be valid. [Op:Cholesky]
Epoch 1/10
157/157 [==============================] - 2s 9ms/step - loss: 0.0460 - val_loss: 0.0268
Epoch 2/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0260 - val_loss: 0.0160
Epoch 3/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0135 - val_loss: 0.0198
Epoch 4/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0109 - val_loss: 0.0092
Epoch 5/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0085 - val_loss: 0.0081
Epoch 6/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0076 - val_loss: 0.0104
Epoch 7/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0076 - val_loss: 0.0070
Epoch 8/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0069 - val_loss: 0.0065
Epoch 9/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0061 - val_loss: 0.0070
Epoch 10/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0057 - val_loss: 0.0065
INFO:Main:Apply PCA for 2D plot of Z
INFO:Main:Z shape of auto_encoder_frog = (1000, 1024)
INFO:fix_non_positive_definite_matrix:Correction error : 0.0
INFO:fix_non_positive_definite_matrix:VV^T after fixing = [1.0000312 1.0000013 1.0000079 ... 1.0000094 1.0000015 1.       ]
INFO:calc_z_likelihood:Eigenvalues for sample covariance marix : shape =  (1024,) , values = [-7.5317011e-08 -2.8629717e-08 -1.3205582e-08 ...  4.2912030e+00
  5.7973919e+00  5.1994446e+01]
INFO:Main:Training autoencoder for label 7 with name horse
Cholesky decomposition was not successful. The input might not be valid. [Op:Cholesky]
Epoch 1/10
157/157 [==============================] - 2s 8ms/step - loss: 0.0451 - val_loss: 0.0246
Epoch 2/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0231 - val_loss: 0.0234
Epoch 3/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0160 - val_loss: 0.0160
Epoch 4/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0117 - val_loss: 0.0098
Epoch 5/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0091 - val_loss: 0.0085
Epoch 6/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0076 - val_loss: 0.0097
Epoch 7/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0074 - val_loss: 0.0072
Epoch 8/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0064 - val_loss: 0.0066
Epoch 9/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0058 - val_loss: 0.0061
Epoch 10/10
157/157 [==============================] - 1s 9ms/step - loss: 0.0057 - val_loss: 0.0057
INFO:Main:Apply PCA for 2D plot of Z
INFO:Main:Z shape of auto_encoder_horse = (1000, 1024)
INFO:fix_non_positive_definite_matrix:Correction error : 0.0
INFO:fix_non_positive_definite_matrix:VV^T after fixing = [1.0000092 1.0000036 1.0000087 ... 1.0000128 1.        1.       ]
INFO:calc_z_likelihood:Eigenvalues for sample covariance marix : shape =  (1024,) , values = [-5.0423114e-07 -8.8779515e-08 -1.9840035e-08 ...  9.1120291e+00
  1.1948152e+01  2.9989197e+01]
INFO:Main:Training autoencoder for label 8 with name ship
Cholesky decomposition was not successful. The input might not be valid. [Op:Cholesky]
Epoch 1/10
157/157 [==============================] - 2s 8ms/step - loss: 0.0363 - val_loss: 0.0218
Epoch 2/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0204 - val_loss: 0.0179
Epoch 3/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0161 - val_loss: 0.0129
Epoch 4/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0131 - val_loss: 0.0120
Epoch 5/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0108 - val_loss: 0.0091
Epoch 6/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0092 - val_loss: 0.0107
Epoch 7/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0082 - val_loss: 0.0104
Epoch 8/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0081 - val_loss: 0.0065
Epoch 9/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0062 - val_loss: 0.0059
Epoch 10/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0060 - val_loss: 0.0059
INFO:Main:Apply PCA for 2D plot of Z
INFO:Main:Z shape of auto_encoder_ship = (1000, 1024)
INFO:fix_non_positive_definite_matrix:Correction error : 0.0
INFO:fix_non_positive_definite_matrix:VV^T after fixing = [1.        1.        1.        ... 1.0000004 1.        1.       ]
INFO:calc_z_likelihood:Eigenvalues for sample covariance marix : shape =  (1024,) , values = [-1.16121846e-07 -3.17381073e-08 -1.15144010e-08 ...  7.83758831e+00
  1.19092188e+01  2.10897598e+01]
INFO:Main:Training autoencoder for label 9 with name truck
Cholesky decomposition was not successful. The input might not be valid. [Op:Cholesky]
Epoch 1/10
157/157 [==============================] - 2s 8ms/step - loss: 0.0477 - val_loss: 0.0311
Epoch 2/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0233 - val_loss: 0.0188
Epoch 3/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0180 - val_loss: 0.0150
Epoch 4/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0140 - val_loss: 0.0123
Epoch 5/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0116 - val_loss: 0.0111
Epoch 6/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0101 - val_loss: 0.0195
Epoch 7/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0104 - val_loss: 0.0094
Epoch 8/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0087 - val_loss: 0.0081
Epoch 9/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0077 - val_loss: 0.0084
Epoch 10/10
157/157 [==============================] - 1s 8ms/step - loss: 0.0069 - val_loss: 0.0068
INFO:Main:Apply PCA for 2D plot of Z
INFO:Main:Z shape of auto_encoder_truck = (1000, 1024)
INFO:fix_non_positive_definite_matrix:Correction error : 0.0
INFO:fix_non_positive_definite_matrix:VV^T after fixing = [1.0000066 1.0000051 1.0000064 ... 1.000006  1.0000086 1.       ]
INFO:calc_z_likelihood:Eigenvalues for sample covariance marix : shape =  (1024,) , values = [-3.4355025e-08 -1.3900106e-08 -9.6917967e-09 ...  9.0126667e+00
  1.0628846e+01  2.9780653e+01]
Cholesky decomposition was not successful. The input might not be valid. [Op:Cholesky]